agr <- d %>%
group_by(Category,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
SD = sd(Accuracy))
## `summarise()` has grouped output by 'Category'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 6 × 4
## # Groups: Category [3]
## Category Task MeanAccuracy SD
## <chr> <chr> <dbl> <dbl>
## 1 Adjs Concrete 0.801 0.399
## 2 Adjs Valence 0.934 0.249
## 3 Nouns Concrete 0.897 0.304
## 4 Nouns Valence 0.960 0.195
## 5 Verbs Concrete 0.917 0.276
## 6 Verbs Valence 0.956 0.205
agr <- d %>%
group_by(Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill = Task)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# guides(fill = "none")
agr <- d %>%
group_by(Task,Category) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill = Category)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# ggsave("../graphs/conc-abs_categoryXtask.pdf",width = 5, height = 3)
agr <- d %>%
group_by(Word,Category,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Category'. You can override using
## the `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy, color = Category)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
geom_text(aes(label = Word), vjust = -0.5, hjust = 1.5) +
facet_wrap(~Task)
## `geom_smooth()` using formula = 'y ~ x'
# guides(legend = "none")
# theme(legend.position = "none") # Remove the legend
# ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy, color = Task)) +
# geom_point() +
# geom_smooth(aes(color = Task), method = "lm", se = FALSE, size = 1.2) + # Darker line
# geom_text(aes(label = Word), vjust = -0.5, hjust = 1.5) +
# scale_color_manual(values = c("Adjs" = "red",
# "Nouns" = "green",
# "Verbs" = "blue")) + # Adjust colors
# theme(legend.position = "none") # Remove the legend
# ggsave("../graphs/exp1b_accXrt.pdf",width = 5, height = 3)
# Compute highest accuracy for Concrete, keeping top 10 words per Category
concrete_accuracy <- d %>%
group_by(Category, Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime), .groups = "drop") %>%
filter(Task == "Concrete") %>%
select(Category, Word, MeanAccuracy) %>%
rename(ConcreteAccuracy = MeanAccuracy) %>%
group_by(Category) %>%
slice_max(order_by = ConcreteAccuracy, n = 10) # Get top 10 per category
agr <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task,Category) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Task'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 70 × 5
## # Groups: Word, Task [66]
## Word Task Category MeanAccuracy MeanReactionTime
## <chr> <chr> <chr> <dbl> <dbl>
## 1 awful Concrete Adjs 0.933 970.
## 2 awful Valence Adjs 0.983 772.
## 3 bleed Concrete Verbs 0.967 1031.
## 4 bleed Valence Verbs 0.967 818.
## 5 cherish Concrete Verbs 0.967 1128.
## 6 cherish Valence Verbs 1 794.
## 7 chocolate Concrete Nouns 1 957
## 8 chocolate Valence Nouns 0.933 720.
## 9 compassion Concrete Nouns 0.983 804.
## 10 compassion Valence Nouns 0.983 834.
## # ℹ 60 more rows
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5) +
facet_wrap(~Category) +
guides(legend = "none") +
theme(legend.position = "none") # Remove the legend
## `geom_smooth()` using formula = 'y ~ x'
# ggsave("../graphs/ConcAbs_accXrt.pdf",width = 5, height = 3)
agr <- d %>%
group_by(Word,Category,Task) %>%
filter(Word %in% concrete_accuracy$Word) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Category'. You can override using
## the `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy, color = Category)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
geom_text(aes(label = Word), vjust = -0.5, hjust = 1.5) +
facet_wrap(~Task)
## `geom_smooth()` using formula = 'y ~ x'
# ggsave("../graphs/ConcAbs_catXaccXrt.pdf",width = 5, height = 2)
agr <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 66 × 4
## # Groups: Word [33]
## Word Task MeanAccuracy MeanReactionTime
## <chr> <chr> <dbl> <dbl>
## 1 awful Concrete 0.933 970.
## 2 awful Valence 0.983 772.
## 3 bleed Concrete 0.967 1031.
## 4 bleed Valence 0.967 818.
## 5 cherish Concrete 0.967 1128.
## 6 cherish Valence 1 794.
## 7 chocolate Concrete 1 957
## 8 chocolate Valence 0.933 720.
## 9 compassion Concrete 0.983 804.
## 10 compassion Valence 0.983 834.
## # ℹ 56 more rows
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy, color = Task)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) + # Separate lines for each Task
geom_text(aes(label = Word), vjust = -0.5, hjust = 1.5) + # Task color inherited from ggplot()
theme(legend.position = "none") # Remove the legend
## `geom_smooth()` using formula = 'y ~ x'
# ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
# geom_point() +
# geom_smooth(method = "lm", se = FALSE, color = "black") +
# geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5) +
# guides(legend = "none") +
# theme(legend.position = "none") # Remove the legend
# ggsave("../graphs/ConcAbs_accXrt.pdf",width = 5, height = 3)
agr <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Word,y=MeanAccuracy, fill = Task)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
agr <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task,Category) %>%
reframe(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Word,y=MeanReactionTime, fill = Category)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
facet_wrap(~Task) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
names(d)
## [1] "...1" "ID.true" "Word" "Label"
## [5] "ConcValCombo" "Task" "BlockOrder" "Group"
## [9] "Response" "Accuracy" "EventTime" "Value"
## [13] "RT" "ReactionTime" "Key_value_F" "Key_value_J"
## [17] "Comments" "LogReactionTime" "LogRT" "TrialNumber"
## [21] "Category"
dcen <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
mutate(Word = as.factor(Word),
ID.true = as.factor(ID.true),
Task = as.factor(Task),
Category = as.factor(Category),
cAccuracy = as.numeric(Accuracy)-mean(as.numeric(Accuracy)),
cTask = as.numeric(Task)-mean(as.numeric(Task)))
m <- lmer(LogReactionTime ~ cAccuracy*cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 +
## cTask | ID.true)
## Data: dcen
##
## REML criterion at convergence: 3098.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.1132 -0.5633 -0.1484 0.3715 10.3296
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.040580 0.20145
## cTask 0.026792 0.16368 -0.16
## Word (Intercept) 0.001337 0.03656
## cTask 0.005284 0.07269 0.14
## Residual 0.112267 0.33506
## Number of obs: 4200, groups: ID.true, 60; Word, 33
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.68886 0.02729 64.64643 245.125 < 2e-16 ***
## cAccuracy -0.02618 0.02988 4123.79642 -0.876 0.380975
## cTask -0.10804 0.02678 66.64631 -4.034 0.000143 ***
## cAccuracy:cTask -0.06553 0.05966 4129.22350 -1.098 0.272073
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask
## cAccuracy -0.002
## cTask -0.108 0.021
## cAccrcy:cTs 0.010 -0.256 -0.004
m.s <- lmer(LogReactionTime ~ cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
anova(m,m.s, test="chisq")
## refitting model(s) with ML (instead of REML)
## Data: dcen
## Models:
## m.s: LogReactionTime ~ cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## m: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m.s 9 3099.6 3156.7 -1540.8 3081.6
## m 11 3101.0 3170.8 -1539.5 3079.0 2.6386 2 0.2673
m.c <- lmer(LogReactionTime ~ cAccuracy*cTask*Category + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
summary(m.c)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask * Category + (1 + cTask |
## Word) + (1 + cTask | ID.true)
## Data: dcen
##
## REML criterion at convergence: 3114.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.1223 -0.5641 -0.1454 0.3774 10.3234
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.039892 0.19973
## cTask 0.027225 0.16500 -0.14
## Word (Intercept) 0.001392 0.03731
## cTask 0.005248 0.07244 0.17
## Residual 0.112175 0.33493
## Number of obs: 4200, groups: ID.true, 60; Word, 33
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.66746 0.04720 64.03574 141.263 < 2e-16
## cAccuracy -0.10854 0.04715 4135.09816 -2.302 0.02139
## cTask -0.10275 0.04756 65.81477 -2.160 0.03438
## CategoryNouns -0.02131 0.06628 62.71939 -0.322 0.74884
## CategoryVerbs 0.08339 0.06636 62.96993 1.257 0.21352
## cAccuracy:cTask -0.01895 0.09406 4123.27195 -0.202 0.84031
## cAccuracy:CategoryNouns 0.08942 0.08139 4105.92872 1.099 0.27195
## cAccuracy:CategoryVerbs 0.19142 0.07165 4114.01395 2.672 0.00758
## cTask:CategoryNouns 0.02179 0.06545 66.63913 0.333 0.74023
## cTask:CategoryVerbs -0.03731 0.06576 66.99367 -0.567 0.57237
## cAccuracy:cTask:CategoryNouns -0.07148 0.16256 4115.17490 -0.440 0.66019
## cAccuracy:cTask:CategoryVerbs -0.06294 0.14303 4120.49712 -0.440 0.65993
##
## (Intercept) ***
## cAccuracy *
## cTask *
## CategoryNouns
## CategoryVerbs
## cAccuracy:cTask
## cAccuracy:CategoryNouns
## cAccuracy:CategoryVerbs **
## cTask:CategoryNouns
## cTask:CategoryVerbs
## cAccuracy:cTask:CategoryNouns
## cAccuracy:cTask:CategoryVerbs
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask CtgryN CtgryV cAcc:T cAc:CN cAc:CV cTs:CN
## cAccuracy 0.014
## cTask -0.084 0.005
## CategoryNns -0.712 -0.010 0.060
## CategryVrbs -0.711 -0.010 0.060 0.512
## cAccrcy:cTs 0.003 -0.107 0.028 -0.002 -0.002
## cAccrcy:CtN -0.008 -0.579 -0.003 -0.009 0.004 0.062
## cAccrcy:CtV -0.009 -0.658 -0.003 0.007 0.002 0.070 0.381
## cTsk:CtgryN 0.061 -0.004 -0.727 -0.092 -0.041 -0.020 0.041 0.006
## cTsk:CtgryV 0.061 -0.004 -0.723 -0.041 -0.090 -0.020 0.006 0.001 0.546
## cAccrc:T:CN -0.002 0.062 -0.016 0.020 0.003 -0.579 -0.473 -0.041 -0.018
## cAccrc:T:CV -0.002 0.070 -0.018 0.003 0.000 -0.658 -0.041 -0.038 0.015
## cTs:CV cA:T:CN
## cAccuracy
## cTask
## CategoryNns
## CategryVrbs
## cAccrcy:cTs
## cAccrcy:CtN
## cAccrcy:CtV
## cTsk:CtgryN
## cTsk:CtgryV
## cAccrc:T:CN 0.008
## cAccrc:T:CV 0.003 0.381
d$Category = as.factor(d$Category)
str(d$Category)
## Factor w/ 3 levels "Adjs","Nouns",..: 3 3 3 3 3 3 3 3 3 3 ...
dcen <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
filter(Category == "Adjs") %>%
mutate(Word = as.factor(Word),
ID.true = as.factor(ID.true),
Task = as.factor(Task),
Category = as.factor(Category),
cAccuracy = as.numeric(Accuracy)-mean(as.numeric(Accuracy)),
cTask = as.numeric(Task)-mean(as.numeric(Task)))
m <- lmer(LogReactionTime ~ cAccuracy*cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -7.8e+00
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 +
## cTask | ID.true)
## Data: dcen
##
## REML criterion at convergence: 856.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1537 -0.5446 -0.1372 0.3511 7.5771
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.048745 0.22078
## cTask 0.040973 0.20242 -0.34
## Word (Intercept) 0.000000 0.00000
## cTask 0.004243 0.06514 NaN
## Residual 0.108071 0.32874
## Number of obs: 1200, groups: ID.true, 20; Word, 10
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.66903 0.05027 18.99442 132.657 <2e-16 ***
## cAccuracy -0.11017 0.04628 1178.29554 -2.381 0.0174 *
## cTask -0.10249 0.05323 21.29784 -1.925 0.0676 .
## cAccuracy:cTask -0.01873 0.09207 1175.88757 -0.203 0.8388
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask
## cAccuracy 0.000
## cTask -0.285 0.007
## cAccrcy:cTs 0.004 -0.114 -0.001
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
m.s <- lmer(LogReactionTime ~ cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -1.8e+00
anova(m,m.s, test="chisq")
## refitting model(s) with ML (instead of REML)
## Data: dcen
## Models:
## m.s: LogReactionTime ~ cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## m: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m.s 9 865.2 911.01 -423.60 847.2
## m 11 863.3 919.29 -420.65 841.3 5.9034 2 0.05225 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
dcen <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
filter(Category == "Verbs") %>%
mutate(Word = as.factor(Word),
ID.true = as.factor(ID.true),
Task = as.factor(Task),
Category = as.factor(Category),
cAccuracy = as.numeric(Accuracy)-mean(as.numeric(Accuracy)),
cTask = as.numeric(Task)-mean(as.numeric(Task)))
m <- lmer(LogReactionTime ~ cAccuracy*cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 +
## cTask | ID.true)
## Data: dcen
##
## REML criterion at convergence: 1056.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.0886 -0.5691 -0.1218 0.4304 10.3293
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.043491 0.20855
## cTask 0.025435 0.15948 0.12
## Word (Intercept) 0.003336 0.05775
## cTask 0.005753 0.07585 -0.39
## Residual 0.110844 0.33293
## Number of obs: 1440, groups: ID.true, 20; Word, 12
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.75085 0.05029 22.98100 134.227 < 2e-16 ***
## cAccuracy 0.08468 0.05364 1399.56925 1.579 0.11462
## cTask -0.15476 0.04538 22.56421 -3.410 0.00244 **
## cAccuracy:cTask -0.08913 0.10714 1404.07656 -0.832 0.40563
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask
## cAccuracy 0.000
## cTask 0.027 0.000
## cAccrcy:cTs 0.000 0.014 0.000
m.s <- lmer(LogReactionTime ~ cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
anova(m,m.s, test="chisq")
## refitting model(s) with ML (instead of REML)
## Data: dcen
## Models:
## m.s: LogReactionTime ~ cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## m: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m.s 9 1062.9 1110.4 -522.46 1044.9
## m 11 1063.7 1121.7 -520.86 1041.7 3.2031 2 0.2016
dcen <- d %>%
filter(Word %in% concrete_accuracy$Word) %>%
filter(Category == "Nouns") %>%
mutate(Word = as.factor(Word),
ID.true = as.factor(ID.true),
Task = as.factor(Task),
Category = as.factor(Category),
cAccuracy = as.numeric(Accuracy)-mean(as.numeric(Accuracy)),
cTask = as.numeric(Task)-mean(as.numeric(Task)))
m <- lmer(LogReactionTime ~ cAccuracy*cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
## boundary (singular) fit: see help('isSingular')
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 +
## cTask | ID.true)
## Data: dcen
##
## REML criterion at convergence: 1180.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4028 -0.5825 -0.1672 0.3682 9.4460
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID.true (Intercept) 0.0276381 0.16625
## cTask 0.0165818 0.12877 -0.16
## Word (Intercept) 0.0009145 0.03024
## cTask 0.0057894 0.07609 1.00
## Residual 0.1160707 0.34069
## Number of obs: 1560, groups: ID.true, 20; Word, 13
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.64526 0.03910 20.64477 169.956 <2e-16 ***
## cAccuracy -0.02485 0.06721 1520.21240 -0.370 0.7116
## cTask -0.07492 0.03976 22.47093 -1.884 0.0725 .
## cAccuracy:cTask -0.08226 0.13434 1523.63135 -0.612 0.5404
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cAccrc cTask
## cAccuracy -0.025
## cTask 0.004 0.074
## cAccrcy:cTs 0.037 -0.655 -0.048
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
m.s <- lmer(LogReactionTime ~ cTask + (1+cTask|Word) + (1+cTask|ID.true), data = dcen)
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -3.9e+01
anova(m,m.s, test="chisq")
## refitting model(s) with ML (instead of REML)
## Data: dcen
## Models:
## m.s: LogReactionTime ~ cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## m: LogReactionTime ~ cAccuracy * cTask + (1 + cTask | Word) + (1 + cTask | ID.true)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## m.s 9 1183.9 1232.1 -582.95 1165.9
## m 11 1186.5 1245.4 -582.24 1164.5 1.4231 2 0.4909
agr <- d %>%
group_by(ID.true,Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(ID.true,MeanAccuracy),y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- d %>%
group_by(Word,Task,Category) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
ggplot(agr, aes(x=Task, y=MeanAccuracy,fill=Category)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2))
# guides(fill = "none")
agr <- d %>%
group_by(ID.true,Task,ConcValCombo) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'ID.true', 'Task'. You can override using
## the `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(ID.true,MeanReactionTime),y=MeanReactionTime,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(ConcValCombo,Category) %>%
summarize(PropConcrete = mean(Response.n),
CILow = ci.low(Response.n),
CIHigh = ci.high(Response.n)) %>%
mutate(YMin = PropConcrete - CILow,
YMax = PropConcrete + CIHigh)
## `summarise()` has grouped output by 'ConcValCombo'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Category,y=PropConcrete,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Version) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
agr <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropConcrete = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropConcrete)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word, color = ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
agr <- d %>%
filter(Task == "Valence") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("negative", "positive"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropPositive = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropPositive)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word, color = ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
agr <- d %>%
# filter(Task == "Concrete") %>%
# mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,Task,Category) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
facet_wrap(~Task) +
geom_text(aes(label = Word, color = Category), vjust = -0.5, hjust = 1.5) +
guides(legend = "none")
## `geom_smooth()` using formula = 'y ~ x'
# theme(
# legend.position = "top", # Move legend to the top
# legend.title = element_text(size = 10), # Adjust legend title size
# legend.text = element_text(size = 9) # Adjust legend text size
# )
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
# ggsave("../graphs/exp3_accXrt.pdf",width = 5, height = 3)
# First remove inaccurate participants
length(unique(d$ID.true))
## [1] 60
inacc.parts <- d %>%
group_by(ID.true,Task,Category) %>%
summarise(MeanAccuracy = mean(Accuracy)) %>%
filter(MeanAccuracy < .75)
## `summarise()` has grouped output by 'ID.true', 'Task'. You can override using
## the `.groups` argument.
# How many participants have Accuracy < .75?
length(unique(inacc.parts$ID.true))
## [1] 8
# Remove them
d.inaccurate.removed <- d %>%
anti_join(inacc.parts, by = "ID.true")
# Sanity check
length(unique(d.inaccurate.removed$ID.true))
## [1] 52
# Second, remove all inaccurate trials
orig <- nrow(d.inaccurate.removed)
d.inaccurate.removed <- d.inaccurate.removed %>%
filter(Accuracy == 1)
nrow(d.inaccurate.removed)/orig*100
## [1] 93.46154
# Third, Remove subjects with ReactionTime higher than 3x IQR
summary(d.inaccurate.removed$LogReactionTime)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2.303 6.435 6.613 6.705 6.886 10.619
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 6.924 7.328 7.436 7.479 7.579 10.008
range(d.inaccurate.removed$LogReactionTime)
## [1] 2.302585 10.618714
hist(d.inaccurate.removed$LogReactionTime, breaks=100, col="lightblue", xlab="LogReactionTime (ms)",
main="Histogram with Normal Curve")
quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)
## 0% 25% 50% 75% 100%
## 2.302585 6.434547 6.613384 6.885765 10.618714
IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3 # 0.7526289
## [1] 1.353656
cutoff.high <- quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)[4] + IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3 # 8.419261
cutoff.low <- quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)[2] - IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3# 6.5088838.419261
# remove subjects with ReactionTime higher than 3 x IQR
df.outliers.removed <- subset(d.inaccurate.removed, (d.inaccurate.removed$LogReactionTime > cutoff.low) & (d.inaccurate.removed$LogReactionTime < cutoff.high))
hist(df.outliers.removed$LogReactionTime, breaks=100, col="lightblue", xlab="LogReactionTime (ms)",
main="Histogram with Normal Curve")
ggplot(df.outliers.removed, aes(x=LogReactionTime, fill=Task)) +
# facet_wrap(~BlockOrder) +
geom_density(alpha = .4)
ggplot(df.outliers.removed, aes(x=ReactionTime, fill=Task)) +
# facet_wrap(~BlockOrder) +
geom_density(alpha = .4)
agr <- d.inaccurate.removed %>%
group_by(Task,Category) %>%
summarize(MeanRT = mean(ReactionTime),
SD = sd(ReactionTime),
MeanLogRT = mean(LogReactionTime))
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 6 × 5
## # Groups: Task [2]
## Task Category MeanRT SD MeanLogRT
## <chr> <fct> <dbl> <dbl> <dbl>
## 1 Concrete Adjs 1033. 762. 6.81
## 2 Concrete Nouns 918. 715. 6.72
## 3 Concrete Verbs 1071. 1091. 6.85
## 4 Valence Adjs 799. 387. 6.61
## 5 Valence Nouns 773. 687. 6.57
## 6 Valence Verbs 866. 406. 6.69
agr <- df.outliers.removed %>%
group_by(Task,Word) %>%
summarize(MeanLogReactionTime = mean(LogReactionTime),
CILow = ci.low(LogReactionTime),
CIHigh = ci.high(LogReactionTime)) %>%
mutate(YMin = MeanLogReactionTime - CILow,
YMax = MeanLogReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanLogReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanLogReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
# geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position="dodge", show.legend = FALSE) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
agr <- df.outliers.removed %>%
group_by(Task,Word) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
# geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position="dodge", show.legend = FALSE) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
# ggsave("../graphs/total_rt_violin.pdf",width = 4, height = 3)
prop <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word) %>%
summarize(PropConcrete = mean(Response.n))
agr <- df.outliers.removed %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropConcrete = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropConcrete)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word,color=ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
Yes, looks like choosing negative is faster than choosing positive d
just barely, choosing abstract has a negative effect on RT
agr <- df.outliers.removed %>%
group_by(Task,Category) %>%
reframe(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(Category,MeanReactionTime),y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- df.outliers.removed %>%
group_by(Word,Task,Category) %>%
reframe(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
ggplot(agr, aes(x=Category, y=MeanReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
guides(fill = "none")
test <- df.outliers.removed %>%
# filter(Task %in% c("Concrete", "Valence")) %>% # Keep only relevant tasks
group_by(Word, Task) %>%
summarise(
RT = mean(ReactionTime, na.rm = TRUE), # Take the mean RT if duplicates exist
.groups = "drop_last" # Drop grouping by Task but keep Word and ID.true
) %>%
pivot_wider(names_from = Task, values_from = RT, names_prefix = "RT_") %>% # Reshape to wide format
filter(RT_Concrete <= RT_Valence) %>% # Apply the condition
pivot_longer(
cols = starts_with("RT_"), # Select the reshaped columns
names_to = "Task", # Restore Task column
names_prefix = "RT_", # Remove "RT_" prefix to match original Task names
values_to = "ReactionTime" # Column for the RT values
) %>%
ungroup()
nrow(test)/nrow(df.outliers.removed)
## [1] 0.001553733
agr <- test %>%
group_by(Word, Task) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
ggplot(agr,aes(x=Word, y=MeanReactionTime, alpha=Task, fill=Task)) +
geom_bar(position="dodge",stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
# facet_wrap(~Task, ncol=10) +
xlab("ConcValCombo") +
ylab("MeanAccuracy") +
guides(fill=FALSE) +
# guides(alpha=guide_legend(title="Task")) +
theme(legend.key.size = unit(0.3, "cm"),
legend.position = "top", # c(.5,1)
legend.direction = "horizontal",
legend.margin=margin(0,0,0,0),
legend.box.margin=margin(0,0,-5,-5),legend.spacing.y = unit(0.001, 'cm')) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette) +
scale_alpha_discrete(range = c(.5,1)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## Warning: The `<scale>` argument of `guides()` cannot be `FALSE`. Use "none" instead as
## of ggplot2 3.3.4.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: Using alpha for a discrete variable is not advised.
test_avv <- d %>%
filter(Word %in% test$Word) %>%
group_by(Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy),
) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
# View(test_avv)
dodge = position_dodge(.9)
ggplot(data=test_avv, aes(x=Task,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
test_avv <- d %>%
filter(Word %in% test$Word) %>%
group_by(Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(test_avv, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
# geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5)
geom_text_repel(aes(label = Word, color = Task),
vjust = -0.5, hjust = 1.5) +
scale_fill_manual(values=cbPalette) +
scale_color_manual(values=cbPalette)
## `geom_smooth()` using formula = 'y ~ x'
# Compute highest accuracy for Concrete
concrete_accuracy <- d %>%
group_by(Word,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime)) %>%
filter(Task == "Concrete") %>%
select(Word, MeanAccuracy) %>%
rename(ConcreteAccuracy = MeanAccuracy) %>%
arrange(desc(ConcreteAccuracy)) %>%
head(10)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 20 × 4
## # Groups: Word [10]
## Word Task MeanAccuracy MeanReactionTime
## <chr> <chr> <dbl> <dbl>
## 1 chocolate Concrete 1 747.
## 2 chocolate Valence 1 697.
## 3 compassion Concrete 1 788.
## 4 compassion Valence 1 815.
## 5 despair Concrete 1 818.
## 6 despair Valence 1 746.
## 7 fireplace Concrete 1 771.
## 8 fireplace Valence 1 813.
## 9 grief Concrete 1 861.
## 10 grief Valence 1 769.
## 11 inspiration Concrete 1 800.
## 12 inspiration Valence 1 782.
## 13 kindness Concrete 1 947.
## 14 kindness Valence 1 635.
## 15 kiss Concrete 1 877.
## 16 kiss Valence 1 831.
## 17 mud Concrete 1 906.
## 18 mud Valence 1 770.
## 19 shame Concrete 1 849.
## 20 shame Valence 1 705.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# guides(legend = "none")
# theme(legend.position = "none") # Remove the legend
# ggsave("../graphs/exp1b_accXrt.pdf",width = 5, height = 3)
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word, Task) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 20 × 7
## # Groups: Word [10]
## Word Task MeanReactionTime CILow CIHigh YMin YMax
## <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 chocolate Concrete 747. 67.1 75.9 680. 823.
## 2 chocolate Valence 697. 52.6 54.4 645. 752.
## 3 compassion Concrete 788. 60.4 73.4 728. 861.
## 4 compassion Valence 815. 101. 122. 714. 936.
## 5 despair Concrete 818. 101. 140. 717. 958.
## 6 despair Valence 746. 61.5 70.6 684. 816.
## 7 fireplace Concrete 771. 60.9 70.5 710. 842.
## 8 fireplace Valence 813. 66.7 73.0 746. 886.
## 9 grief Concrete 861. 96.8 114. 764. 976.
## 10 grief Valence 769. 87.6 110. 682. 880.
## 11 inspiration Concrete 800. 67.4 73.3 733. 874.
## 12 inspiration Valence 782. 63.3 73.8 718. 855.
## 13 kindness Concrete 947. 132. 146. 816. 1094.
## 14 kindness Valence 635. 35.4 34.5 599. 669.
## 15 kiss Concrete 877. 97.9 112. 779. 990.
## 16 kiss Valence 831. 92.2 114. 739. 945.
## 17 mud Concrete 906. 124. 126. 782. 1033.
## 18 mud Valence 770. 80.8 94.7 689. 864.
## 19 shame Concrete 849. 84.5 94.0 764. 943.
## 20 shame Valence 705. 45.3 48.5 660. 754.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Word,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word)
ggplot(agr, aes(x=Word, y=ReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
# Median dot
stat_summary(fun = median, geom = "point",
shape = 21, size = 1.5,
position = position_dodge(width=0.9)) + # Centering the median dot
# geom_jitter(shape=10, position=position_jitter(0.2)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
convert everything to factors
Yes
m = lmer(LogReactionTime ~ Task + (1|ID.true) + (1|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Task + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 5191.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.5057 -0.6543 -0.1714 0.4628 5.0814
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002588 0.05087
## ID.true (Intercept) 0.036627 0.19138
## Residual 0.088528 0.29754
## Number of obs: 11585, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.776e+00 2.726e-02 5.530e+01 248.60 <2e-16 ***
## TaskValence -1.555e-01 5.546e-03 1.145e+04 -28.04 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## TaskValence -0.105
table(center$Category)
##
## Adjs Nouns Verbs
## 3073 4239 4273
m = lmer(LogReactionTime ~ Category + (1|ID.true) + (1|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Category + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 5946.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1338 -0.6525 -0.1809 0.4647 5.1642
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002364 0.04862
## ID.true (Intercept) 0.035268 0.18780
## Residual 0.094659 0.30767
## Number of obs: 11585, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.70018 0.05108 51.28017 131.170 <2e-16 ***
## CategoryNouns -0.06628 0.06739 51.49656 -0.984 0.330
## CategoryVerbs 0.05418 0.06739 51.49045 0.804 0.425
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) CtgryN
## CategoryNns -0.758
## CategryVrbs -0.758 0.576
m = lmer(LogReactionTime ~ Task*Category + (1|ID.true) + (1|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Task * Category + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 5202.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4944 -0.6547 -0.1680 0.4643 5.0992
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002581 0.0508
## ID.true (Intercept) 0.035509 0.1884
## Residual 0.088497 0.2975
## Number of obs: 11585, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.793e+00 5.159e-02 5.273e+01 131.679 <2e-16 ***
## TaskValence -1.781e-01 1.078e-02 1.145e+04 -16.523 <2e-16 ***
## CategoryNouns -8.143e-02 6.806e-02 5.293e+01 -1.197 0.2368
## CategoryVerbs 3.424e-02 6.805e-02 5.290e+01 0.503 0.6169
## TaskValence:CategoryNouns 2.804e-02 1.416e-02 1.146e+04 1.980 0.0477 *
## TaskValence:CategoryVerbs 3.340e-02 1.411e-02 1.144e+04 2.366 0.0180 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) TskVln CtgryN CtgryV TsV:CN
## TaskValence -0.109
## CategoryNns -0.758 0.083
## CategryVrbs -0.758 0.083 0.577
## TskVlnc:CtN 0.083 -0.761 -0.108 -0.063
## TskVlnc:CtV 0.083 -0.764 -0.063 -0.107 0.582
str(df_factors)
## tibble [11,585 × 21] (S3: tbl_df/tbl/data.frame)
## $ ...1 : Factor w/ 4775 levels "1","2","3","4",..: 1 3 4 5 6 7 8 9 10 11 ...
## $ ID.true : Factor w/ 52 levels "5588b04ffdf99b7a91e75ddb",..: 5 5 5 5 5 5 5 5 5 5 ...
## $ Word : Factor w/ 117 levels "admired","annoy",..: 63 86 2 12 90 6 70 101 57 114 ...
## $ Label : Factor w/ 2 levels "test_conc","test_val": 2 2 2 2 2 2 2 2 2 2 ...
## $ ConcValCombo : Factor w/ 4 levels "abstract-negative",..: 1 4 1 4 1 1 3 3 2 3 ...
## $ Task : Factor w/ 2 levels "Concrete","Valence": 2 2 2 2 2 2 2 2 2 2 ...
## $ BlockOrder : Factor w/ 2 levels "CV","VC": 2 2 2 2 2 2 2 2 2 2 ...
## $ Group : Factor w/ 6 levels "A","abstract;concrete",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ Response : Factor w/ 4 levels "abstract","concrete",..: 3 4 3 4 3 3 3 3 4 3 ...
## $ Accuracy : Factor w/ 1 level "1": 1 1 1 1 1 1 1 1 1 1 ...
## $ EventTime : Factor w/ 11584 levels "1732203168805",..: 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 ...
## $ Value : Factor w/ 4 levels "abstract","concrete",..: 3 4 3 4 3 3 3 3 4 3 ...
## $ RT : Factor w/ 1691 levels "1270","1272.16666666667",..: 1184 1691 1380 1653 1444 1532 1364 1357 1166 1187 ...
## $ ReactionTime : num [1:11585] 1164 1184 1660 1157 1264 ...
## $ Key_value_F : Factor w/ 4 levels "abstract","concrete",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ Key_value_J : Factor w/ 2 levels "A","B": 2 2 2 2 2 2 2 2 2 2 ...
## $ Comments : Factor w/ 0 levels: NA NA NA NA NA NA NA NA NA NA ...
## $ LogReactionTime: num [1:11585] 7.06 7.08 7.41 7.05 7.14 ...
## $ LogRT : Factor w/ 1691 levels "7.14677217945264",..: 1184 1691 1380 1653 1444 1532 1364 1357 1166 1187 ...
## $ TrialNumber : Factor w/ 240 levels "1","2","3","4",..: 1 3 4 5 6 7 8 9 10 11 ...
## $ Category : Factor w/ 3 levels "Adjs","Nouns",..: 3 3 3 3 3 3 3 3 3 3 ...
conc <- df_factors %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1,
Concrete = ifelse(grepl("abstract", ConcValCombo), "abstract",
ifelse(grepl("concrete", ConcValCombo), "concrete", NA)),
cConcValCombo = as.numeric(ConcValCombo) - mean(as.numeric(ConcValCombo)),
cConcrete = as.numeric(as.factor(Concrete)) - mean(as.numeric(as.factor(Concrete))),
# cSyntactic = as.numeric(factor(Syntactic)) - mean(as.numeric(factor(Syntactic)))
)
m = lmer(LogReactionTime ~ cConcValCombo + (1+cConcValCombo|ID.true) + (1+cConcValCombo|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcValCombo + (1 + cConcValCombo | ID.true) +
## (1 + cConcValCombo | Word)
## Data: conc
##
## REML criterion at convergence: 3137.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6179 -0.6346 -0.1791 0.4717 4.8591
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.002293 0.04788
## cConcValCombo 0.002611 0.05110 0.85
## ID.true (Intercept) 0.043548 0.20868
## cConcValCombo 0.003030 0.05504 -0.19
## Residual 0.094883 0.30803
## Number of obs: 5624, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.7908831 0.0298862 55.2507060 227.224 <2e-16 ***
## cConcValCombo -0.0007486 0.0106036 81.8385078 -0.071 0.944
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcValCmb -0.051
No effect of word concreteness
m = lmer(LogReactionTime ~ cConcrete + (1+cConcrete|ID.true) + (1|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcrete + (1 + cConcrete | ID.true) + (1 |
## Word)
## Data: conc
##
## REML criterion at convergence: 3118.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6385 -0.6448 -0.1755 0.4805 4.9471
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.004746 0.06889
## ID.true (Intercept) 0.043091 0.20758
## cConcrete 0.016485 0.12839 -0.18
## Residual 0.094308 0.30710
## Number of obs: 5624, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.78218 0.02979 55.77945 227.655 <2e-16 ***
## cConcrete -0.02065 0.02349 83.70034 -0.879 0.382
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcrete -0.136
Small effect of response choice
m = lmer(LogReactionTime ~ Response.n + (1|ID.true) + (1|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Response.n + (1 | ID.true) + (1 | Word)
## Data: conc
##
## REML criterion at convergence: 3239.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3853 -0.6470 -0.1793 0.4899 4.9199
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.005636 0.07507
## ID.true (Intercept) 0.042882 0.20708
## Residual 0.097689 0.31255
## Number of obs: 5624, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.78896 0.03091 64.64002 219.613 <2e-16 ***
## Response.n -0.01904 0.01631 103.90862 -1.168 0.246
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## Response.n -0.258
val <- df.outliers.removed %>%
filter(Task == "Valence") %>%
filter(!is.na(ConcValCombo)) %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("negative", "positive"))) - 1,
Valence = case_when(
grepl("negative", ConcValCombo) ~ "negative",
grepl("positive", ConcValCombo) ~ "positive",
TRUE ~ NA_character_
),
# Valence = ifelse(grepl("negative", ConcValCombo), "negative",
# ifelse(grepl("positive", ConcValCombo), "positive", NA)),
cConcValCombo = as.numeric(as.factor(ConcValCombo)) - mean(as.numeric(as.factor(ConcValCombo))),
cValence = as.numeric(as.factor(Valence)) - mean(as.numeric(as.factor(Valence)))
)
sum(is.na(val$ConcValCombo)) # Count missing values
## [1] 0
sum(is.na(val$LogReactionTime)) # Check for missing LogReactionTime
## [1] 0
var(val$cConcValCombo)
## [1] 1.245257
unique(val$ConcValCombo)
## [1] "abstract-negative" "concrete-positive" "concrete-negative"
## [4] "abstract-positive"
# View(val)
# valna <- val %>%
# filter(is.na(val$ConcValCombo))
m = lmer(LogReactionTime ~ cConcValCombo + (1+cConcValCombo|ID.true) + (1+cConcValCombo|Word), data=val)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcValCombo + (1 + cConcValCombo | ID.true) +
## (1 + cConcValCombo | Word)
## Data: val
##
## REML criterion at convergence: 1183
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1019 -0.6208 -0.1584 0.4361 5.6861
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 3.717e-03 0.060968
## cConcValCombo 2.332e-05 0.004829 -1.00
## ID.true (Intercept) 4.033e-02 0.200812
## cConcValCombo 3.050e-04 0.017463 0.08
## Residual 6.666e-02 0.258187
## Number of obs: 5961, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.621582 0.028628 55.042064 231.295 <2e-16 ***
## cConcValCombo -0.005987 0.006351 105.906807 -0.943 0.348
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcValCmb 0.002
Effect of Word Valence
m = lmer(LogReactionTime ~ cValence + (1+cValence|ID.true) + (1|Word), data=val)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cValence + (1 + cValence | ID.true) + (1 |
## Word)
## Data: val
##
## REML criterion at convergence: 1148.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2866 -0.6132 -0.1493 0.4357 5.7974
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.003815 0.06177
## ID.true (Intercept) 0.040457 0.20114
## cValence 0.004277 0.06540 0.11
## Residual 0.065952 0.25681
## Number of obs: 5961, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.62209 0.02868 55.10568 230.882 <2e-16 ***
## cValence -0.02197 0.01610 108.55084 -1.365 0.175
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cValence 0.063
Effect of response choice
m = lmer(LogReactionTime ~ Response.n + (1|ID.true) + (1|Word), data=val)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Response.n + (1 | ID.true) + (1 | Word)
## Data: val
##
## REML criterion at convergence: 1187.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1176 -0.6150 -0.1554 0.4411 5.7484
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.00372 0.06099
## ID.true (Intercept) 0.04037 0.20093
## Residual 0.06702 0.25887
## Number of obs: 5961, groups: Word, 117; ID.true, 52
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.63269 0.02938 60.65279 225.759 <2e-16 ***
## Response.n -0.02209 0.01318 111.12508 -1.676 0.0965 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## Response.n -0.223